1. Create conda env with GPU
  2. Create data generator with augmentation
  3. Create simple model
  4. create advanced model with Bayesian Optimization for Hyperparameter tuning
In [ ]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import segmentation_models as sm
import glob
%reload_ext autoreload
%autoreload 2
In [ ]:
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
Num GPUs Available:  1
In [ ]:
from tensorflow.python.client import device_lib

device_lib.list_local_devices()
Out[ ]:
[name: "/device:CPU:0"
 device_type: "CPU"
 memory_limit: 268435456
 locality {
 }
 incarnation: 960528230496385992,
 name: "/device:GPU:0"
 device_type: "GPU"
 memory_limit: 9883877376
 locality {
   bus_id: 1
   links {
   }
 }
 incarnation: 5547680197840193138
 physical_device_desc: "device: 0, name: NVIDIA GeForce RTX 3080 Ti, pci bus id: 0000:01:00.0, compute capability: 8.6"]
In [ ]:
NUM_CLASSES = 34
IMAGE_SIZE = (256, 256, 3)
BATCH_SIZE = 18
In [ ]:
def display_batch_of_images(batch):
  import matplotlib.pyplot as plt

  for i in range(len(batch[0])):

    image = batch[0][i]

    # normalize to [0,1]
    image =  (image - np.min(image)) / (np.max(image) - np.min(image) + 1e-9)

    segmentation = tf.math.argmax(batch[1][i], axis=-1) # undo to_categorical

    segmentation = tf.cast(segmentation, tf.float32)
    segmentation = tf.multiply((255.0 / NUM_CLASSES), segmentation)
    segmentation = segmentation / 255.0

    plt.figure()

    plt.subplot(1, 2, 1)
    plt.imshow(image)
    plt.title('Image')

    plt.subplot(1, 2, 2)
    plt.imshow(segmentation, vmin=0, vmax=1)
    plt.title('Ground Truth Segmentation')

    plt.show()
In [ ]:
DATA_FOLDER = f"data"
In [ ]:
import albumentations as A
from data_generator import SegmentationDataGenerator

aug = A.Compose([
        A.VerticalFlip(p=0.5),              
        A.RandomRotate90(p=0.5),
        A.PixelDropout(p=0.01),
        A.OneOf([
            A.MotionBlur(p=0.6),
            A.Blur(p=0.4),
        ], p=0.6),
        A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=20, p=0.75),
        A.OpticalDistortion(p=0.3),
        #A.GridDistortion(p=0.3),
        A.RandomBrightnessContrast(p=0.5)
    ]
)

train_data = SegmentationDataGenerator(
    image_directory = f"{DATA_FOLDER}/train/train2/*_img_*",
    segmentation_directory = f"{DATA_FOLDER}/train/train2/*_lbl_*",
    num_classes = NUM_CLASSES,
    batch_size = BATCH_SIZE,
    augmentation = aug
)
train_data_gen = train_data.get_data_generator()

val_data = SegmentationDataGenerator(
    image_directory = f"{DATA_FOLDER}/val/*_img_*",
    segmentation_directory = f"{DATA_FOLDER}/val/*_lbl_*",
    num_classes = NUM_CLASSES,
    batch_size = BATCH_SIZE,
    augmentation = False
)
val_data_gen = val_data.get_data_generator()
Indexing Image files...
Indexing Segmentation files...
Loaded 23520 images with 23520 segmentations
Indexing Image files...
Indexing Segmentation files...
Loaded 8544 images with 8544 segmentations

Print examples¶

In [ ]:
batch = train_data_gen.__next__()
display_batch_of_images(batch)
In [ ]:
batch = val_data_gen.__next__()
display_batch_of_images(batch)

Define Model¶

In [ ]:
def fcn_simple_no_border(input_height:int, input_width:int, input_channels:int, num_classes:int) -> tf.keras.Model:
    """
    Create a simple fcn model for semantic segmentation with 2 classes
    """
    model = tf.keras.Sequential()
    
    # (used to define input shape on the first layers)
    model.add(tf.keras.layers.Layer(input_shape=(input_height , input_width, input_channels)))
    
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))

    model.add(tf.keras.layers.Convolution2D(filters=64, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=64, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=64, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=64, kernel_size=3, strides=(1, 1), padding='same', activation='relu'))

    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=1, strides=1, padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=1, strides=1, padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=1, strides=1, padding='same', activation='relu'))
    model.add(tf.keras.layers.Convolution2D(filters=32, kernel_size=1, strides=1, padding='same', activation='relu'))
    
    model.add(tf.keras.layers.Convolution2D(filters=num_classes, kernel_size=3, strides=(1, 1), padding='same', activation='softmax'))
    
    return model
In [ ]:
#model = fcn_simple_no_border(IMAGE_SIZE[0], IMAGE_SIZE[1], IMAGE_SIZE[2], NUM_CLASSES)
In [ ]:
from unet import build_unet

model = build_unet(NUM_CLASSES, IMAGE_SIZE[0], IMAGE_SIZE[1], IMAGE_SIZE[2] )
In [ ]:
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 256, 256, 3) 0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 256, 256, 16) 448         input_1[0][0]                    
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 256, 256, 16) 64          conv2d[0][0]                     
__________________________________________________________________________________________________
activation (Activation)         (None, 256, 256, 16) 0           batch_normalization[0][0]        
__________________________________________________________________________________________________
dropout (Dropout)               (None, 256, 256, 16) 0           activation[0][0]                 
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 256, 256, 16) 2320        dropout[0][0]                    
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 256, 256, 16) 64          conv2d_1[0][0]                   
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 256, 256, 16) 0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 256, 256, 16) 0           activation_1[0][0]               
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 256, 256, 16) 2320        dropout_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 256, 256, 16) 64          conv2d_2[0][0]                   
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 256, 256, 16) 0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 256, 256, 16) 0           activation_2[0][0]               
__________________________________________________________________________________________________
add (Add)                       (None, 256, 256, 16) 0           dropout_2[0][0]                  
                                                                 conv2d[0][0]                     
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 128, 128, 16) 0           add[0][0]                        
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 128, 128, 32) 4640        max_pooling2d[0][0]              
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 128, 128, 32) 128         conv2d_3[0][0]                   
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 128, 128, 32) 0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 128, 128, 32) 0           activation_3[0][0]               
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 128, 128, 32) 9248        dropout_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 128, 128, 32) 128         conv2d_4[0][0]                   
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 128, 128, 32) 0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 128, 128, 32) 0           activation_4[0][0]               
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 128, 128, 32) 9248        dropout_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 128, 128, 32) 128         conv2d_5[0][0]                   
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 128, 128, 32) 0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
dropout_5 (Dropout)             (None, 128, 128, 32) 0           activation_5[0][0]               
__________________________________________________________________________________________________
add_1 (Add)                     (None, 128, 128, 32) 0           dropout_5[0][0]                  
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 64, 64, 32)   0           add_1[0][0]                      
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 64, 64, 64)   18496       max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 64, 64, 64)   256         conv2d_6[0][0]                   
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 64, 64, 64)   0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
dropout_6 (Dropout)             (None, 64, 64, 64)   0           activation_6[0][0]               
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 64, 64, 64)   36928       dropout_6[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 64, 64, 64)   256         conv2d_7[0][0]                   
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 64, 64, 64)   0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
dropout_7 (Dropout)             (None, 64, 64, 64)   0           activation_7[0][0]               
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 64, 64, 64)   36928       dropout_7[0][0]                  
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 64, 64, 64)   256         conv2d_8[0][0]                   
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 64, 64, 64)   0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
dropout_8 (Dropout)             (None, 64, 64, 64)   0           activation_8[0][0]               
__________________________________________________________________________________________________
add_2 (Add)                     (None, 64, 64, 64)   0           dropout_8[0][0]                  
                                                                 conv2d_6[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 32, 32, 64)   0           add_2[0][0]                      
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 32, 32, 128)  73856       max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 32, 32, 128)  512         conv2d_9[0][0]                   
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 32, 32, 128)  0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
dropout_9 (Dropout)             (None, 32, 32, 128)  0           activation_9[0][0]               
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 32, 32, 128)  147584      dropout_9[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 32, 32, 128)  512         conv2d_10[0][0]                  
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 32, 32, 128)  0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
dropout_10 (Dropout)            (None, 32, 32, 128)  0           activation_10[0][0]              
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 32, 32, 128)  147584      dropout_10[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 32, 32, 128)  512         conv2d_11[0][0]                  
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 32, 32, 128)  0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
dropout_11 (Dropout)            (None, 32, 32, 128)  0           activation_11[0][0]              
__________________________________________________________________________________________________
add_3 (Add)                     (None, 32, 32, 128)  0           dropout_11[0][0]                 
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 16, 16, 128)  0           add_3[0][0]                      
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 16, 16, 256)  295168      max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 16, 16, 256)  1024        conv2d_12[0][0]                  
__________________________________________________________________________________________________
activation_12 (Activation)      (None, 16, 16, 256)  0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
dropout_12 (Dropout)            (None, 16, 16, 256)  0           activation_12[0][0]              
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 16, 16, 256)  590080      dropout_12[0][0]                 
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 16, 16, 256)  1024        conv2d_13[0][0]                  
__________________________________________________________________________________________________
activation_13 (Activation)      (None, 16, 16, 256)  0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
dropout_13 (Dropout)            (None, 16, 16, 256)  0           activation_13[0][0]              
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 16, 16, 256)  590080      dropout_13[0][0]                 
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 16, 16, 256)  1024        conv2d_14[0][0]                  
__________________________________________________________________________________________________
activation_14 (Activation)      (None, 16, 16, 256)  0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
dropout_14 (Dropout)            (None, 16, 16, 256)  0           activation_14[0][0]              
__________________________________________________________________________________________________
add_4 (Add)                     (None, 16, 16, 256)  0           dropout_14[0][0]                 
                                                                 conv2d_12[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)  (None, 8, 8, 256)    0           add_4[0][0]                      
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 8, 8, 512)    131584      max_pooling2d_4[0][0]            
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 8, 8, 512)    2048        conv2d_15[0][0]                  
__________________________________________________________________________________________________
activation_15 (Activation)      (None, 8, 8, 512)    0           batch_normalization_15[0][0]     
__________________________________________________________________________________________________
dropout_15 (Dropout)            (None, 8, 8, 512)    0           activation_15[0][0]              
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 8, 8, 512)    262656      dropout_15[0][0]                 
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 8, 8, 512)    2048        conv2d_16[0][0]                  
__________________________________________________________________________________________________
activation_16 (Activation)      (None, 8, 8, 512)    0           batch_normalization_16[0][0]     
__________________________________________________________________________________________________
dropout_16 (Dropout)            (None, 8, 8, 512)    0           activation_16[0][0]              
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 8, 8, 512)    262656      dropout_16[0][0]                 
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 8, 8, 512)    2048        conv2d_17[0][0]                  
__________________________________________________________________________________________________
activation_17 (Activation)      (None, 8, 8, 512)    0           batch_normalization_17[0][0]     
__________________________________________________________________________________________________
add_5 (Add)                     (None, 8, 8, 512)    0           activation_17[0][0]              
                                                                 conv2d_15[0][0]                  
__________________________________________________________________________________________________
dropout_17 (Dropout)            (None, 8, 8, 512)    0           add_5[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 16, 16, 512)  1049088     dropout_17[0][0]                 
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 16, 16, 768)  0           conv2d_transpose[0][0]           
                                                                 add_4[0][0]                      
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 16, 16, 512)  3539456     concatenate[0][0]                
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 16, 16, 512)  2048        conv2d_18[0][0]                  
__________________________________________________________________________________________________
activation_18 (Activation)      (None, 16, 16, 512)  0           batch_normalization_18[0][0]     
__________________________________________________________________________________________________
dropout_18 (Dropout)            (None, 16, 16, 512)  0           activation_18[0][0]              
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 16, 16, 512)  2359808     dropout_18[0][0]                 
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 16, 16, 512)  2048        conv2d_19[0][0]                  
__________________________________________________________________________________________________
activation_19 (Activation)      (None, 16, 16, 512)  0           batch_normalization_19[0][0]     
__________________________________________________________________________________________________
dropout_19 (Dropout)            (None, 16, 16, 512)  0           activation_19[0][0]              
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 16, 16, 512)  2359808     dropout_19[0][0]                 
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 16, 16, 512)  2048        conv2d_20[0][0]                  
__________________________________________________________________________________________________
activation_20 (Activation)      (None, 16, 16, 512)  0           batch_normalization_20[0][0]     
__________________________________________________________________________________________________
add_6 (Add)                     (None, 16, 16, 512)  0           activation_20[0][0]              
                                                                 conv2d_18[0][0]                  
__________________________________________________________________________________________________
dropout_20 (Dropout)            (None, 16, 16, 512)  0           add_6[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 32, 32, 256)  524544      dropout_20[0][0]                 
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 32, 32, 384)  0           conv2d_transpose_1[0][0]         
                                                                 add_3[0][0]                      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 32, 32, 256)  884992      concatenate_1[0][0]              
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 32, 32, 256)  1024        conv2d_21[0][0]                  
__________________________________________________________________________________________________
activation_21 (Activation)      (None, 32, 32, 256)  0           batch_normalization_21[0][0]     
__________________________________________________________________________________________________
dropout_21 (Dropout)            (None, 32, 32, 256)  0           activation_21[0][0]              
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 32, 32, 256)  590080      dropout_21[0][0]                 
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 32, 32, 256)  1024        conv2d_22[0][0]                  
__________________________________________________________________________________________________
activation_22 (Activation)      (None, 32, 32, 256)  0           batch_normalization_22[0][0]     
__________________________________________________________________________________________________
dropout_22 (Dropout)            (None, 32, 32, 256)  0           activation_22[0][0]              
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 32, 32, 256)  590080      dropout_22[0][0]                 
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 32, 32, 256)  1024        conv2d_23[0][0]                  
__________________________________________________________________________________________________
activation_23 (Activation)      (None, 32, 32, 256)  0           batch_normalization_23[0][0]     
__________________________________________________________________________________________________
add_7 (Add)                     (None, 32, 32, 256)  0           activation_23[0][0]              
                                                                 conv2d_21[0][0]                  
__________________________________________________________________________________________________
dropout_23 (Dropout)            (None, 32, 32, 256)  0           add_7[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 64, 64, 64)   65600       dropout_23[0][0]                 
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 64, 64, 128)  0           conv2d_transpose_2[0][0]         
                                                                 add_2[0][0]                      
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 64, 64, 64)   73792       concatenate_2[0][0]              
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 64, 64, 64)   256         conv2d_24[0][0]                  
__________________________________________________________________________________________________
activation_24 (Activation)      (None, 64, 64, 64)   0           batch_normalization_24[0][0]     
__________________________________________________________________________________________________
dropout_24 (Dropout)            (None, 64, 64, 64)   0           activation_24[0][0]              
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 64, 64, 64)   36928       dropout_24[0][0]                 
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 64, 64, 64)   256         conv2d_25[0][0]                  
__________________________________________________________________________________________________
activation_25 (Activation)      (None, 64, 64, 64)   0           batch_normalization_25[0][0]     
__________________________________________________________________________________________________
dropout_25 (Dropout)            (None, 64, 64, 64)   0           activation_25[0][0]              
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 64, 64, 64)   36928       dropout_25[0][0]                 
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 64, 64, 64)   256         conv2d_26[0][0]                  
__________________________________________________________________________________________________
activation_26 (Activation)      (None, 64, 64, 64)   0           batch_normalization_26[0][0]     
__________________________________________________________________________________________________
add_8 (Add)                     (None, 64, 64, 64)   0           activation_26[0][0]              
                                                                 conv2d_24[0][0]                  
__________________________________________________________________________________________________
dropout_26 (Dropout)            (None, 64, 64, 64)   0           add_8[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_3 (Conv2DTrans (None, 128, 128, 32) 8224        dropout_26[0][0]                 
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 128, 128, 64) 0           conv2d_transpose_3[0][0]         
                                                                 add_1[0][0]                      
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 128, 128, 32) 18464       concatenate_3[0][0]              
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 128, 128, 32) 128         conv2d_27[0][0]                  
__________________________________________________________________________________________________
activation_27 (Activation)      (None, 128, 128, 32) 0           batch_normalization_27[0][0]     
__________________________________________________________________________________________________
dropout_27 (Dropout)            (None, 128, 128, 32) 0           activation_27[0][0]              
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 128, 128, 32) 9248        dropout_27[0][0]                 
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 128, 128, 32) 128         conv2d_28[0][0]                  
__________________________________________________________________________________________________
activation_28 (Activation)      (None, 128, 128, 32) 0           batch_normalization_28[0][0]     
__________________________________________________________________________________________________
dropout_28 (Dropout)            (None, 128, 128, 32) 0           activation_28[0][0]              
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 128, 128, 32) 9248        dropout_28[0][0]                 
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 128, 128, 32) 128         conv2d_29[0][0]                  
__________________________________________________________________________________________________
activation_29 (Activation)      (None, 128, 128, 32) 0           batch_normalization_29[0][0]     
__________________________________________________________________________________________________
add_9 (Add)                     (None, 128, 128, 32) 0           activation_29[0][0]              
                                                                 conv2d_27[0][0]                  
__________________________________________________________________________________________________
dropout_29 (Dropout)            (None, 128, 128, 32) 0           add_9[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_4 (Conv2DTrans (None, 256, 256, 16) 2064        dropout_29[0][0]                 
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 256, 256, 32) 0           conv2d_transpose_4[0][0]         
                                                                 add[0][0]                        
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 256, 256, 32) 9248        concatenate_4[0][0]              
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 256, 256, 32) 128         conv2d_30[0][0]                  
__________________________________________________________________________________________________
activation_30 (Activation)      (None, 256, 256, 32) 0           batch_normalization_30[0][0]     
__________________________________________________________________________________________________
dropout_30 (Dropout)            (None, 256, 256, 32) 0           activation_30[0][0]              
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 256, 256, 32) 9248        dropout_30[0][0]                 
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 256, 256, 32) 128         conv2d_31[0][0]                  
__________________________________________________________________________________________________
activation_31 (Activation)      (None, 256, 256, 32) 0           batch_normalization_31[0][0]     
__________________________________________________________________________________________________
dropout_31 (Dropout)            (None, 256, 256, 32) 0           activation_31[0][0]              
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 256, 256, 32) 9248        dropout_31[0][0]                 
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 256, 256, 32) 128         conv2d_32[0][0]                  
__________________________________________________________________________________________________
activation_32 (Activation)      (None, 256, 256, 32) 0           batch_normalization_32[0][0]     
__________________________________________________________________________________________________
add_10 (Add)                    (None, 256, 256, 32) 0           activation_32[0][0]              
                                                                 conv2d_30[0][0]                  
__________________________________________________________________________________________________
dropout_32 (Dropout)            (None, 256, 256, 32) 0           add_10[0][0]                     
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 256, 256, 34) 1122        dropout_32[0][0]                 
==================================================================================================
Total params: 14,831,890
Trainable params: 14,820,466
Non-trainable params: 11,424
__________________________________________________________________________________________________

Calculate Class weights for loss¶

In [ ]:
#tf.keras.utils.plot_model(model)
In [ ]:
x, y = train_data_gen.__next__()

print(x.shape)
print(y.shape)
(18, 256, 256, 3)
(18, 256, 256, 34)
In [ ]:
_, y = train_data_gen.__next__()

total_values = y.shape[1] * y.shape[2] * y.shape[3]

class_counts = np.zeros(NUM_CLASSES)

n = 5

for _ in range(n):

    _, y = train_data_gen.__next__()

    #undo keras.to_categorical
    y = np.argmax(y, axis=-1)

    indexes = np.unique(y, return_counts=True)[0]
    counts = np.unique(y, return_counts=True)[1]
    for index, count in zip(indexes, counts):
        class_counts[index-1] += count

    
class_weights = np.asarray(class_counts)
class_weights = np.divide(class_weights, total_values*n)
class_weights = np.sum(class_weights) - class_weights
class_weights = np.divide(class_weights, np.sum(class_weights))
    
print(f"Class weights: {class_weights}")
print(np.sum(class_weights))
Class weights: [0.02827627 0.029024   0.02983875 0.02995079 0.03028599 0.02978693
 0.01962276 0.0277229  0.03012128 0.03022372 0.02618636 0.03025748
 0.02977189 0.03030303 0.02978479 0.03030303 0.03006144 0.03030303
 0.03025451 0.0301841  0.02699735 0.0301615  0.02953378 0.02988423
 0.03025124 0.02859819 0.03030303 0.03030303 0.03030303 0.03029475
 0.03030303 0.03028374 0.03021704 0.03030303]
0.9999999999999999
In [ ]:
import wandb
from wandb.keras import WandbMetricsLogger, WandbModelCheckpoint
In [ ]:
LEARNING_RATE = 0.001
STEPS_PER_EPOCH = len(train_data) // BATCH_SIZE

lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate=LEARNING_RATE,
    decay_steps=STEPS_PER_EPOCH * 4,
    decay_rate=0.95)
In [ ]:
!set WANDB_NOTEBOOK_NAME = "01_simple_model.ipynb"
In [ ]:
# Start a run, tracking hyperparameters
wandb.init(
    # set the wandb project where this run will be logged
    project="TSM-CompVis-Segmentation",

    # track hyperparameters and run metadata with wandb.config
    config={
        "optimizer": "Adam",
        "learning_rate": f"{LEARNING_RATE}",
        "loss": "total_loss",
        "batch_size": BATCH_SIZE,
        "steps_per_epoch": STEPS_PER_EPOCH,
        "metric": "Accuracy, F1, Dice"
    }
)
Finishing last run (ID:sbai6ib7) before initializing another...

Run history:


batch/batch_step▁▁▂▂▂▂▃▃▃▃▄▄▄▅▅▅▅▆▆▆▆▇▇▇▇██
batch/categorical_accuracy█▅▅▅▅▅▅█▄▄▄▄▄▅▆▄▅▄▄▅▅▁▅▅▅▅▅
batch/learning_rate██▇▇▇▇▆▆▆▆▅▅▅▄▄▄▄▃▃▃▃▂▂▂▂▁▁
batch/loss▁▅▅▅▅▅▅▃▅▅▅▅▅▅▆▆▅▅▅▅▅█▅▅▅▅▅
epoch/categorical_accuracy█▁▃
epoch/epoch▁▅█
epoch/learning_rate█▄▁
epoch/loss▁██
epoch/val_categorical_accuracy█▃▁
epoch/val_loss▁▆█

Run summary:


batch/batch_step5200
batch/categorical_accuracy0.78928
batch/learning_rate0.00095
batch/loss0.81558
epoch/categorical_accuracy0.7906
epoch/epoch2
epoch/learning_rate0.00096
epoch/loss0.80872
epoch/val_categorical_accuracy0.67855
epoch/val_loss1.17475

View run ruby-salad-64 at: https://wandb.ai/cyfi/TSM-CompVis-Segmentation/runs/sbai6ib7
Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
Find logs at: .\wandb\run-20231223_223230-sbai6ib7\logs
Successfully finished last run (ID:sbai6ib7). Initializing new run:
Tracking run with wandb version 0.16.1
Run data is saved locally in d:\dev\TSM_CompVis-Segmantic-Segmentation\wandb\run-20231223_225251-3ic5p82q
Syncing run autumn-elevator-65 to Weights & Biases (docs)
View project at https://wandb.ai/cyfi/TSM-CompVis-Segmentation
View run at https://wandb.ai/cyfi/TSM-CompVis-Segmentation/runs/3ic5p82q
Out[ ]:
In [ ]:
MODEL_NAME = wandb.run.name
In [ ]:
 
In [ ]:
# Define callbacks.

checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(
    "training/"+MODEL_NAME+"/checkpoints/epoch_{epoch}_val_loss_{val_loss}.keras",
    monitor='val_loss',
    save_best_only=True
)

tensorboard_cb = tf.keras.callbacks.TensorBoard(
    log_dir=f"training/{MODEL_NAME}/tensorboard",
    histogram_freq=1,
    write_graph=True
)
In [ ]:
metrics = [
        tf.keras.metrics.CategoricalAccuracy(),
    ]

loss = tf.keras.losses.CategoricalCrossentropy()


optimizer = tf.keras.optimizers.Adam(
    learning_rate=lr_schedule,
)


model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
In [ ]:
print(f"Run Name: {MODEL_NAME}")
Run Name: autumn-elevator-65
In [ ]:
history = model.fit(x=train_data_gen, validation_data=val_data_gen, validation_steps=(len(val_data) // (BATCH_SIZE*2)), validation_batch_size=BATCH_SIZE, steps_per_epoch=STEPS_PER_EPOCH, batch_size=BATCH_SIZE, epochs=800, callbacks=[checkpoint_cb, WandbMetricsLogger(log_freq=200)])
Epoch 1/800
1306/1306 [==============================] - 290s 220ms/step - loss: 0.7791 - categorical_accuracy: 0.7959 - val_loss: 0.8662 - val_categorical_accuracy: 0.7754
c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\utils\generic_utils.py:497: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.
  category=CustomMaskWarning)
Epoch 2/800
1306/1306 [==============================] - 303s 232ms/step - loss: 0.7904 - categorical_accuracy: 0.7946 - val_loss: 0.8179 - val_categorical_accuracy: 0.7926
Epoch 3/800
1306/1306 [==============================] - 291s 223ms/step - loss: 0.8018 - categorical_accuracy: 0.7922 - val_loss: 0.7279 - val_categorical_accuracy: 0.8135
Epoch 4/800
1306/1306 [==============================] - 280s 214ms/step - loss: 0.7978 - categorical_accuracy: 0.7938 - val_loss: 0.8199 - val_categorical_accuracy: 0.7853
Epoch 5/800
1306/1306 [==============================] - 271s 207ms/step - loss: 0.7994 - categorical_accuracy: 0.7937 - val_loss: 0.7110 - val_categorical_accuracy: 0.8201
Epoch 6/800
1306/1306 [==============================] - 271s 207ms/step - loss: 0.8015 - categorical_accuracy: 0.7930 - val_loss: 0.7812 - val_categorical_accuracy: 0.7911
Epoch 7/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7936 - categorical_accuracy: 0.7955 - val_loss: 0.7879 - val_categorical_accuracy: 0.7904
Epoch 8/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.8039 - categorical_accuracy: 0.7928 - val_loss: 0.9213 - val_categorical_accuracy: 0.7455
Epoch 9/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7899 - categorical_accuracy: 0.7968 - val_loss: 1.0514 - val_categorical_accuracy: 0.7270
Epoch 10/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7944 - categorical_accuracy: 0.7957 - val_loss: 0.7695 - val_categorical_accuracy: 0.8024
Epoch 11/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.7940 - categorical_accuracy: 0.7953 - val_loss: 0.8084 - val_categorical_accuracy: 0.7914
Epoch 12/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7812 - categorical_accuracy: 0.7985 - val_loss: 0.9263 - val_categorical_accuracy: 0.7583
Epoch 13/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7809 - categorical_accuracy: 0.7993 - val_loss: 0.7541 - val_categorical_accuracy: 0.8055
Epoch 14/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7816 - categorical_accuracy: 0.7987 - val_loss: 0.6890 - val_categorical_accuracy: 0.8232
Epoch 15/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7772 - categorical_accuracy: 0.8003 - val_loss: 0.6933 - val_categorical_accuracy: 0.8252
Epoch 16/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7763 - categorical_accuracy: 0.7991 - val_loss: 0.7563 - val_categorical_accuracy: 0.8083
Epoch 17/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7681 - categorical_accuracy: 0.8017 - val_loss: 0.7735 - val_categorical_accuracy: 0.8061
Epoch 18/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7664 - categorical_accuracy: 0.8030 - val_loss: 0.8357 - val_categorical_accuracy: 0.7769
Epoch 19/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.7626 - categorical_accuracy: 0.8032 - val_loss: 0.7686 - val_categorical_accuracy: 0.8065
Epoch 20/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7617 - categorical_accuracy: 0.8033 - val_loss: 0.7168 - val_categorical_accuracy: 0.8154
Epoch 21/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7574 - categorical_accuracy: 0.8045 - val_loss: 0.6609 - val_categorical_accuracy: 0.8299
Epoch 22/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7521 - categorical_accuracy: 0.8060 - val_loss: 0.7983 - val_categorical_accuracy: 0.7976
Epoch 23/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7496 - categorical_accuracy: 0.8065 - val_loss: 0.8999 - val_categorical_accuracy: 0.7536
Epoch 24/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7539 - categorical_accuracy: 0.8048 - val_loss: 0.8433 - val_categorical_accuracy: 0.7719
Epoch 25/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7496 - categorical_accuracy: 0.8062 - val_loss: 0.6427 - val_categorical_accuracy: 0.8377
Epoch 26/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7359 - categorical_accuracy: 0.8098 - val_loss: 0.8077 - val_categorical_accuracy: 0.7941
Epoch 27/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.7390 - categorical_accuracy: 0.8088 - val_loss: 0.7029 - val_categorical_accuracy: 0.8197
Epoch 28/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7364 - categorical_accuracy: 0.8089 - val_loss: 0.7797 - val_categorical_accuracy: 0.7964
Epoch 29/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7296 - categorical_accuracy: 0.8113 - val_loss: 0.6827 - val_categorical_accuracy: 0.8230
Epoch 30/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7325 - categorical_accuracy: 0.8106 - val_loss: 0.7203 - val_categorical_accuracy: 0.8156
Epoch 31/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7294 - categorical_accuracy: 0.8115 - val_loss: 0.6887 - val_categorical_accuracy: 0.8226
Epoch 32/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7202 - categorical_accuracy: 0.8135 - val_loss: 0.7962 - val_categorical_accuracy: 0.7908
Epoch 33/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7242 - categorical_accuracy: 0.8116 - val_loss: 0.7792 - val_categorical_accuracy: 0.7946
Epoch 34/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7191 - categorical_accuracy: 0.8133 - val_loss: 0.7233 - val_categorical_accuracy: 0.8153
Epoch 35/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.7184 - categorical_accuracy: 0.8138 - val_loss: 0.7425 - val_categorical_accuracy: 0.8106
Epoch 36/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7136 - categorical_accuracy: 0.8147 - val_loss: 0.7753 - val_categorical_accuracy: 0.7981
Epoch 37/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7074 - categorical_accuracy: 0.8165 - val_loss: 0.7073 - val_categorical_accuracy: 0.8183
Epoch 38/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7097 - categorical_accuracy: 0.8161 - val_loss: 0.8177 - val_categorical_accuracy: 0.7892
Epoch 39/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7107 - categorical_accuracy: 0.8152 - val_loss: 0.9898 - val_categorical_accuracy: 0.7532
Epoch 40/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7002 - categorical_accuracy: 0.8181 - val_loss: 0.7829 - val_categorical_accuracy: 0.7938
Epoch 41/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.7014 - categorical_accuracy: 0.8177 - val_loss: 0.6815 - val_categorical_accuracy: 0.8264
Epoch 42/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6987 - categorical_accuracy: 0.8185 - val_loss: 0.7018 - val_categorical_accuracy: 0.8232
Epoch 43/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.6982 - categorical_accuracy: 0.8180 - val_loss: 0.6242 - val_categorical_accuracy: 0.8382
Epoch 44/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6922 - categorical_accuracy: 0.8194 - val_loss: 0.6766 - val_categorical_accuracy: 0.8248
Epoch 45/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.6956 - categorical_accuracy: 0.8186 - val_loss: 0.6752 - val_categorical_accuracy: 0.8271
Epoch 46/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6885 - categorical_accuracy: 0.8207 - val_loss: 0.7535 - val_categorical_accuracy: 0.7875
Epoch 47/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6870 - categorical_accuracy: 0.8206 - val_loss: 0.6168 - val_categorical_accuracy: 0.8407
Epoch 48/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6882 - categorical_accuracy: 0.8199 - val_loss: 0.6232 - val_categorical_accuracy: 0.8381
Epoch 49/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6802 - categorical_accuracy: 0.8222 - val_loss: 0.6281 - val_categorical_accuracy: 0.8389
Epoch 50/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6825 - categorical_accuracy: 0.8217 - val_loss: 0.8448 - val_categorical_accuracy: 0.7790
Epoch 51/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.6774 - categorical_accuracy: 0.8221 - val_loss: 0.6615 - val_categorical_accuracy: 0.8240
Epoch 52/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6737 - categorical_accuracy: 0.8237 - val_loss: 0.8048 - val_categorical_accuracy: 0.7819
Epoch 53/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6693 - categorical_accuracy: 0.8243 - val_loss: 0.7451 - val_categorical_accuracy: 0.8072
Epoch 54/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6698 - categorical_accuracy: 0.8246 - val_loss: 0.6602 - val_categorical_accuracy: 0.8296
Epoch 55/800
1306/1306 [==============================] - 287s 220ms/step - loss: 0.6612 - categorical_accuracy: 0.8265 - val_loss: 0.7491 - val_categorical_accuracy: 0.8043
Epoch 56/800
1306/1306 [==============================] - 288s 221ms/step - loss: 0.6701 - categorical_accuracy: 0.8244 - val_loss: 0.6819 - val_categorical_accuracy: 0.8213
Epoch 57/800
1306/1306 [==============================] - 272s 208ms/step - loss: 0.6604 - categorical_accuracy: 0.8266 - val_loss: 0.6616 - val_categorical_accuracy: 0.8288
Epoch 58/800
1306/1306 [==============================] - 271s 208ms/step - loss: 0.6597 - categorical_accuracy: 0.8277 - val_loss: 0.6452 - val_categorical_accuracy: 0.8374
Epoch 59/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.6564 - categorical_accuracy: 0.8273 - val_loss: 0.5972 - val_categorical_accuracy: 0.8443
Epoch 60/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6548 - categorical_accuracy: 0.8282 - val_loss: 0.6359 - val_categorical_accuracy: 0.8344
Epoch 61/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6522 - categorical_accuracy: 0.8287 - val_loss: 0.5966 - val_categorical_accuracy: 0.8407
Epoch 62/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6451 - categorical_accuracy: 0.8301 - val_loss: 0.6509 - val_categorical_accuracy: 0.8332
Epoch 63/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6471 - categorical_accuracy: 0.8294 - val_loss: 0.6542 - val_categorical_accuracy: 0.8312
Epoch 64/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6511 - categorical_accuracy: 0.8287 - val_loss: 0.7799 - val_categorical_accuracy: 0.8023
Epoch 65/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.6418 - categorical_accuracy: 0.8310 - val_loss: 0.6359 - val_categorical_accuracy: 0.8354
Epoch 66/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6419 - categorical_accuracy: 0.8312 - val_loss: 0.6502 - val_categorical_accuracy: 0.8302
Epoch 67/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.6421 - categorical_accuracy: 0.8309 - val_loss: 0.6575 - val_categorical_accuracy: 0.8274
Epoch 68/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6372 - categorical_accuracy: 0.8316 - val_loss: 0.6943 - val_categorical_accuracy: 0.8207
Epoch 69/800
1306/1306 [==============================] - 271s 207ms/step - loss: 0.6382 - categorical_accuracy: 0.8318 - val_loss: 0.6541 - val_categorical_accuracy: 0.8249
Epoch 70/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6338 - categorical_accuracy: 0.8326 - val_loss: 0.5886 - val_categorical_accuracy: 0.8464
Epoch 71/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6346 - categorical_accuracy: 0.8321 - val_loss: 0.6676 - val_categorical_accuracy: 0.8238
Epoch 72/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6311 - categorical_accuracy: 0.8332 - val_loss: 0.6308 - val_categorical_accuracy: 0.8329
Epoch 73/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6290 - categorical_accuracy: 0.8338 - val_loss: 0.6783 - val_categorical_accuracy: 0.8150
Epoch 74/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6258 - categorical_accuracy: 0.8344 - val_loss: 0.7365 - val_categorical_accuracy: 0.8112
Epoch 75/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.6280 - categorical_accuracy: 0.8337 - val_loss: 0.6059 - val_categorical_accuracy: 0.8385
Epoch 76/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6191 - categorical_accuracy: 0.8361 - val_loss: 0.6982 - val_categorical_accuracy: 0.8139
Epoch 77/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6207 - categorical_accuracy: 0.8363 - val_loss: 0.7621 - val_categorical_accuracy: 0.8003
Epoch 78/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6178 - categorical_accuracy: 0.8364 - val_loss: 0.6813 - val_categorical_accuracy: 0.8156
Epoch 79/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6181 - categorical_accuracy: 0.8359 - val_loss: 0.6834 - val_categorical_accuracy: 0.8203
Epoch 80/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6186 - categorical_accuracy: 0.8363 - val_loss: 0.6780 - val_categorical_accuracy: 0.8209
Epoch 81/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6140 - categorical_accuracy: 0.8370 - val_loss: 0.6229 - val_categorical_accuracy: 0.8366
Epoch 82/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6097 - categorical_accuracy: 0.8380 - val_loss: 0.6921 - val_categorical_accuracy: 0.8180
Epoch 83/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.6087 - categorical_accuracy: 0.8384 - val_loss: 1.0125 - val_categorical_accuracy: 0.7157
Epoch 84/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6098 - categorical_accuracy: 0.8377 - val_loss: 0.5839 - val_categorical_accuracy: 0.8451
Epoch 85/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6053 - categorical_accuracy: 0.8392 - val_loss: 0.6133 - val_categorical_accuracy: 0.8374
Epoch 86/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6066 - categorical_accuracy: 0.8383 - val_loss: 0.5833 - val_categorical_accuracy: 0.8462
Epoch 87/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6027 - categorical_accuracy: 0.8397 - val_loss: 0.6327 - val_categorical_accuracy: 0.8315
Epoch 88/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6027 - categorical_accuracy: 0.8394 - val_loss: 0.5862 - val_categorical_accuracy: 0.8433
Epoch 89/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.6013 - categorical_accuracy: 0.8396 - val_loss: 0.6901 - val_categorical_accuracy: 0.8168
Epoch 90/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5977 - categorical_accuracy: 0.8410 - val_loss: 0.6273 - val_categorical_accuracy: 0.8362
Epoch 91/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5994 - categorical_accuracy: 0.8401 - val_loss: 0.6204 - val_categorical_accuracy: 0.8391
Epoch 92/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5913 - categorical_accuracy: 0.8424 - val_loss: 0.6612 - val_categorical_accuracy: 0.8225
Epoch 93/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5928 - categorical_accuracy: 0.8418 - val_loss: 0.6175 - val_categorical_accuracy: 0.8397
Epoch 94/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5905 - categorical_accuracy: 0.8428 - val_loss: 0.5927 - val_categorical_accuracy: 0.8420
Epoch 95/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5925 - categorical_accuracy: 0.8418 - val_loss: 0.5982 - val_categorical_accuracy: 0.8388
Epoch 96/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5878 - categorical_accuracy: 0.8430 - val_loss: 0.6250 - val_categorical_accuracy: 0.8354
Epoch 97/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5877 - categorical_accuracy: 0.8429 - val_loss: 0.5906 - val_categorical_accuracy: 0.8429
Epoch 98/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5856 - categorical_accuracy: 0.8434 - val_loss: 0.5811 - val_categorical_accuracy: 0.8459
Epoch 99/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5835 - categorical_accuracy: 0.8443 - val_loss: 0.8027 - val_categorical_accuracy: 0.7795
Epoch 100/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5869 - categorical_accuracy: 0.8431 - val_loss: 0.6040 - val_categorical_accuracy: 0.8396
Epoch 101/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5812 - categorical_accuracy: 0.8443 - val_loss: 0.5834 - val_categorical_accuracy: 0.8485
Epoch 102/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5779 - categorical_accuracy: 0.8455 - val_loss: 0.5819 - val_categorical_accuracy: 0.8491
Epoch 103/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5759 - categorical_accuracy: 0.8457 - val_loss: 0.6048 - val_categorical_accuracy: 0.8388
Epoch 104/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5740 - categorical_accuracy: 0.8469 - val_loss: 0.5966 - val_categorical_accuracy: 0.8431
Epoch 105/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5714 - categorical_accuracy: 0.8469 - val_loss: 0.5740 - val_categorical_accuracy: 0.8487
Epoch 106/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5702 - categorical_accuracy: 0.8470 - val_loss: 0.5826 - val_categorical_accuracy: 0.8482
Epoch 107/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5688 - categorical_accuracy: 0.8475 - val_loss: 0.5890 - val_categorical_accuracy: 0.8449
Epoch 108/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5744 - categorical_accuracy: 0.8460 - val_loss: 0.6083 - val_categorical_accuracy: 0.8398
Epoch 109/800
1306/1306 [==============================] - 301s 230ms/step - loss: 0.5677 - categorical_accuracy: 0.8477 - val_loss: 0.6062 - val_categorical_accuracy: 0.8431
Epoch 110/800
1306/1306 [==============================] - 280s 215ms/step - loss: 0.5688 - categorical_accuracy: 0.8472 - val_loss: 0.5889 - val_categorical_accuracy: 0.8460
Epoch 111/800
1306/1306 [==============================] - 274s 209ms/step - loss: 0.5658 - categorical_accuracy: 0.8482 - val_loss: 0.5612 - val_categorical_accuracy: 0.8512
Epoch 112/800
1306/1306 [==============================] - 272s 208ms/step - loss: 0.5637 - categorical_accuracy: 0.8484 - val_loss: 0.5542 - val_categorical_accuracy: 0.8531
Epoch 113/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5586 - categorical_accuracy: 0.8493 - val_loss: 0.6315 - val_categorical_accuracy: 0.8276
Epoch 114/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5627 - categorical_accuracy: 0.8485 - val_loss: 0.5597 - val_categorical_accuracy: 0.8493
Epoch 115/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5591 - categorical_accuracy: 0.8492 - val_loss: 0.5682 - val_categorical_accuracy: 0.8466
Epoch 116/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5573 - categorical_accuracy: 0.8502 - val_loss: 0.5731 - val_categorical_accuracy: 0.8491
Epoch 117/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5585 - categorical_accuracy: 0.8501 - val_loss: 0.5887 - val_categorical_accuracy: 0.8443
Epoch 118/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5561 - categorical_accuracy: 0.8503 - val_loss: 0.5908 - val_categorical_accuracy: 0.8455
Epoch 119/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5532 - categorical_accuracy: 0.8507 - val_loss: 0.6436 - val_categorical_accuracy: 0.8298
Epoch 120/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5537 - categorical_accuracy: 0.8503 - val_loss: 0.5674 - val_categorical_accuracy: 0.8512
Epoch 121/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5506 - categorical_accuracy: 0.8512 - val_loss: 0.5639 - val_categorical_accuracy: 0.8538
Epoch 122/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5541 - categorical_accuracy: 0.8508 - val_loss: 0.6234 - val_categorical_accuracy: 0.8326
Epoch 123/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5500 - categorical_accuracy: 0.8517 - val_loss: 0.5820 - val_categorical_accuracy: 0.8461
Epoch 124/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5458 - categorical_accuracy: 0.8526 - val_loss: 0.5392 - val_categorical_accuracy: 0.8572
Epoch 125/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5499 - categorical_accuracy: 0.8518 - val_loss: 0.5395 - val_categorical_accuracy: 0.8580
Epoch 126/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5463 - categorical_accuracy: 0.8522 - val_loss: 0.5306 - val_categorical_accuracy: 0.8595
Epoch 127/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5443 - categorical_accuracy: 0.8530 - val_loss: 0.6220 - val_categorical_accuracy: 0.8340
Epoch 128/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5425 - categorical_accuracy: 0.8530 - val_loss: 0.6693 - val_categorical_accuracy: 0.8200
Epoch 129/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5424 - categorical_accuracy: 0.8533 - val_loss: 0.6338 - val_categorical_accuracy: 0.8257
Epoch 130/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5453 - categorical_accuracy: 0.8524 - val_loss: 0.5485 - val_categorical_accuracy: 0.8572
Epoch 131/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5395 - categorical_accuracy: 0.8540 - val_loss: 0.6003 - val_categorical_accuracy: 0.8440
Epoch 132/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5346 - categorical_accuracy: 0.8554 - val_loss: 0.5545 - val_categorical_accuracy: 0.8492
Epoch 133/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5365 - categorical_accuracy: 0.8545 - val_loss: 0.5756 - val_categorical_accuracy: 0.8483
Epoch 134/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5379 - categorical_accuracy: 0.8541 - val_loss: 0.5613 - val_categorical_accuracy: 0.8479
Epoch 135/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5350 - categorical_accuracy: 0.8552 - val_loss: 0.5434 - val_categorical_accuracy: 0.8567
Epoch 136/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5330 - categorical_accuracy: 0.8553 - val_loss: 0.6462 - val_categorical_accuracy: 0.8305
Epoch 137/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5326 - categorical_accuracy: 0.8555 - val_loss: 0.5437 - val_categorical_accuracy: 0.8563
Epoch 138/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5310 - categorical_accuracy: 0.8561 - val_loss: 0.5552 - val_categorical_accuracy: 0.8527
Epoch 139/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5340 - categorical_accuracy: 0.8551 - val_loss: 0.5978 - val_categorical_accuracy: 0.8456
Epoch 140/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5339 - categorical_accuracy: 0.8555 - val_loss: 0.6280 - val_categorical_accuracy: 0.8270
Epoch 141/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5288 - categorical_accuracy: 0.8565 - val_loss: 0.6266 - val_categorical_accuracy: 0.8326
Epoch 142/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5256 - categorical_accuracy: 0.8575 - val_loss: 0.5533 - val_categorical_accuracy: 0.8556
Epoch 143/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5264 - categorical_accuracy: 0.8570 - val_loss: 0.5663 - val_categorical_accuracy: 0.8465
Epoch 144/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5258 - categorical_accuracy: 0.8573 - val_loss: 0.5786 - val_categorical_accuracy: 0.8497
Epoch 145/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5250 - categorical_accuracy: 0.8571 - val_loss: 0.5827 - val_categorical_accuracy: 0.8453
Epoch 146/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5245 - categorical_accuracy: 0.8577 - val_loss: 0.5667 - val_categorical_accuracy: 0.8479
Epoch 147/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5260 - categorical_accuracy: 0.8569 - val_loss: 0.5206 - val_categorical_accuracy: 0.8623
Epoch 148/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5217 - categorical_accuracy: 0.8582 - val_loss: 0.5647 - val_categorical_accuracy: 0.8488
Epoch 149/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5206 - categorical_accuracy: 0.8580 - val_loss: 0.5500 - val_categorical_accuracy: 0.8514
Epoch 150/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5217 - categorical_accuracy: 0.8579 - val_loss: 0.5320 - val_categorical_accuracy: 0.8544
Epoch 151/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5185 - categorical_accuracy: 0.8593 - val_loss: 0.5353 - val_categorical_accuracy: 0.8618
Epoch 152/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5203 - categorical_accuracy: 0.8582 - val_loss: 0.5504 - val_categorical_accuracy: 0.8561
Epoch 153/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5150 - categorical_accuracy: 0.8597 - val_loss: 0.5526 - val_categorical_accuracy: 0.8521
Epoch 154/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5171 - categorical_accuracy: 0.8587 - val_loss: 0.5712 - val_categorical_accuracy: 0.8487
Epoch 155/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5140 - categorical_accuracy: 0.8601 - val_loss: 0.5504 - val_categorical_accuracy: 0.8544
Epoch 156/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5126 - categorical_accuracy: 0.8601 - val_loss: 0.5699 - val_categorical_accuracy: 0.8477
Epoch 157/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5140 - categorical_accuracy: 0.8595 - val_loss: 0.5602 - val_categorical_accuracy: 0.8494
Epoch 158/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5143 - categorical_accuracy: 0.8599 - val_loss: 0.6428 - val_categorical_accuracy: 0.8292
Epoch 159/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5087 - categorical_accuracy: 0.8610 - val_loss: 0.5596 - val_categorical_accuracy: 0.8507
Epoch 160/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5110 - categorical_accuracy: 0.8603 - val_loss: 0.5197 - val_categorical_accuracy: 0.8627
Epoch 161/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5094 - categorical_accuracy: 0.8607 - val_loss: 0.5435 - val_categorical_accuracy: 0.8540
Epoch 162/800
1306/1306 [==============================] - 297s 227ms/step - loss: 0.5094 - categorical_accuracy: 0.8611 - val_loss: 0.5379 - val_categorical_accuracy: 0.8580
Epoch 163/800
1306/1306 [==============================] - 283s 217ms/step - loss: 0.5055 - categorical_accuracy: 0.8616 - val_loss: 0.5570 - val_categorical_accuracy: 0.8508
Epoch 164/800
1306/1306 [==============================] - 274s 210ms/step - loss: 0.5098 - categorical_accuracy: 0.8610 - val_loss: 0.5499 - val_categorical_accuracy: 0.8547
Epoch 165/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5057 - categorical_accuracy: 0.8623 - val_loss: 0.5465 - val_categorical_accuracy: 0.8543
Epoch 166/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5056 - categorical_accuracy: 0.8620 - val_loss: 0.6048 - val_categorical_accuracy: 0.8368
Epoch 167/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5044 - categorical_accuracy: 0.8622 - val_loss: 0.5892 - val_categorical_accuracy: 0.8426
Epoch 168/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5048 - categorical_accuracy: 0.8620 - val_loss: 0.5803 - val_categorical_accuracy: 0.8450
Epoch 169/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.5016 - categorical_accuracy: 0.8626 - val_loss: 0.6053 - val_categorical_accuracy: 0.8399
Epoch 170/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.5029 - categorical_accuracy: 0.8625 - val_loss: 0.6313 - val_categorical_accuracy: 0.8310
Epoch 171/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5016 - categorical_accuracy: 0.8625 - val_loss: 0.6084 - val_categorical_accuracy: 0.8386
Epoch 172/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.4998 - categorical_accuracy: 0.8627 - val_loss: 0.5598 - val_categorical_accuracy: 0.8493
Epoch 173/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.5002 - categorical_accuracy: 0.8624 - val_loss: 0.5308 - val_categorical_accuracy: 0.8597
Epoch 174/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.4989 - categorical_accuracy: 0.8635 - val_loss: 0.6244 - val_categorical_accuracy: 0.8307
Epoch 175/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.4947 - categorical_accuracy: 0.8644 - val_loss: 0.5292 - val_categorical_accuracy: 0.8624
Epoch 176/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.4986 - categorical_accuracy: 0.8634 - val_loss: 0.5264 - val_categorical_accuracy: 0.8610
Epoch 177/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.4961 - categorical_accuracy: 0.8637 - val_loss: 0.5555 - val_categorical_accuracy: 0.8539
Epoch 178/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.4934 - categorical_accuracy: 0.8639 - val_loss: 0.5552 - val_categorical_accuracy: 0.8540
Epoch 179/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.4979 - categorical_accuracy: 0.8635 - val_loss: 0.5164 - val_categorical_accuracy: 0.8608
Epoch 180/800
1306/1306 [==============================] - 270s 206ms/step - loss: 0.4934 - categorical_accuracy: 0.8647 - val_loss: 0.5488 - val_categorical_accuracy: 0.8549
Epoch 181/800
1306/1306 [==============================] - 269s 206ms/step - loss: 0.4933 - categorical_accuracy: 0.8644 - val_loss: 0.5304 - val_categorical_accuracy: 0.8598
Epoch 182/800
1306/1306 [==============================] - 270s 207ms/step - loss: 0.4964 - categorical_accuracy: 0.8637 - val_loss: 0.5192 - val_categorical_accuracy: 0.8637
Epoch 183/800
 459/1306 [=========>....................] - ETA: 2:53 - loss: 0.4873 - categorical_accuracy: 0.8665
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_17364\1931218566.py in <module>
----> 1 history = model.fit(x=train_data_gen, validation_data=val_data_gen, validation_steps=(len(val_data) // (BATCH_SIZE*2)), validation_batch_size=BATCH_SIZE, steps_per_epoch=STEPS_PER_EPOCH, batch_size=BATCH_SIZE, epochs=800, callbacks=[checkpoint_cb, WandbMetricsLogger(log_freq=200)])

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\wandb\integration\keras\keras.py in new_v2(*args, **kwargs)
    172             for cbk in cbks:
    173                 set_wandb_attrs(cbk, val_data)
--> 174         return old_v2(*args, **kwargs)
    175 
    176     training_arrays.orig_fit_loop = old_arrays

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\wandb\integration\keras\keras.py in new_v2(*args, **kwargs)
    172             for cbk in cbks:
    173                 set_wandb_attrs(cbk, val_data)
--> 174         return old_v2(*args, **kwargs)
    175 
    176     training_arrays.orig_fit_loop = old_arrays

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\wandb\integration\keras\keras.py in new_v2(*args, **kwargs)
    172             for cbk in cbks:
    173                 set_wandb_attrs(cbk, val_data)
--> 174         return old_v2(*args, **kwargs)
    175 
    176     training_arrays.orig_fit_loop = old_arrays

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1187               logs = tmp_logs  # No error, now safe to assign to logs.
   1188               end_step = step + data_handler.step_increment
-> 1189               callbacks.on_train_batch_end(end_step, logs)
   1190               if self.stop_training:
   1191                 break

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\callbacks.py in on_train_batch_end(self, batch, logs)
    433     """
    434     if self._should_call_train_batch_hooks:
--> 435       self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
    436 
    437   def on_test_batch_begin(self, batch, logs=None):

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\callbacks.py in _call_batch_hook(self, mode, hook, batch, logs)
    293       self._call_batch_begin_hook(mode, batch, logs)
    294     elif hook == 'end':
--> 295       self._call_batch_end_hook(mode, batch, logs)
    296     else:
    297       raise ValueError('Unrecognized hook: {}'.format(hook))

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\callbacks.py in _call_batch_end_hook(self, mode, batch, logs)
    313       self._batch_times.append(batch_time)
    314 
--> 315     self._call_batch_hook_helper(hook_name, batch, logs)
    316 
    317     if len(self._batch_times) >= self._num_batches_for_timing_check:

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\callbacks.py in _call_batch_hook_helper(self, hook_name, batch, logs)
    348       start_time = time.time()
    349 
--> 350     logs = self._process_logs(logs, is_batch_hook=True)
    351     for callback in self.callbacks:
    352       hook = getattr(callback, hook_name)

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\callbacks.py in _process_logs(self, logs, is_batch_hook)
    268     if is_batch_hook and self._batch_hooks_support_tf_logs:
    269       return logs
--> 270     return tf_utils.sync_to_numpy_or_python_type(logs)
    271 
    272   def append(self, callback):

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\utils\tf_utils.py in sync_to_numpy_or_python_type(tensors)
    514     return t  # Don't turn ragged or sparse tensors to NumPy.
    515 
--> 516   return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
    517 
    518 

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\util\nest.py in map_structure(func, *structure, **kwargs)
    867 
    868   return pack_sequence_as(
--> 869       structure[0], [func(*x) for x in entries],
    870       expand_composites=expand_composites)
    871 

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\util\nest.py in <listcomp>(.0)
    867 
    868   return pack_sequence_as(
--> 869       structure[0], [func(*x) for x in entries],
    870       expand_composites=expand_composites)
    871 

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\keras\utils\tf_utils.py in _to_single_numpy_or_python_type(t)
    510   def _to_single_numpy_or_python_type(t):
    511     if isinstance(t, tf.Tensor):
--> 512       x = t.numpy()
    513       return x.item() if np.ndim(x) == 0 else x
    514     return t  # Don't turn ragged or sparse tensors to NumPy.

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\framework\ops.py in numpy(self)
   1092     """
   1093     # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
-> 1094     maybe_arr = self._numpy()  # pylint: disable=protected-access
   1095     return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
   1096 

c:\Users\cyril\.conda\envs\tensorflow_gpu\lib\site-packages\tensorflow\python\framework\ops.py in _numpy(self)
   1058   def _numpy(self):
   1059     try:
-> 1060       return self._numpy_internal()
   1061     except core._NotOkStatusException as e:  # pylint: disable=protected-access
   1062       six.raise_from(core._status_to_exception(e.code, e.message), None)  # pylint: disable=protected-access

KeyboardInterrupt: 
In [ ]:
 
In [ ]:
 

Calculate Performance¶

In [ ]:
model = tf.keras.models.load_model(f"training/autumn-elevator-65/checkpoints/epoch_179_val_loss_0.5163795351982117.keras", compile=False)
In [ ]:
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 256, 256, 3) 0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 256, 256, 16) 448         input_1[0][0]                    
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 256, 256, 16) 64          conv2d[0][0]                     
__________________________________________________________________________________________________
activation (Activation)         (None, 256, 256, 16) 0           batch_normalization[0][0]        
__________________________________________________________________________________________________
dropout (Dropout)               (None, 256, 256, 16) 0           activation[0][0]                 
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 256, 256, 16) 2320        dropout[0][0]                    
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 256, 256, 16) 64          conv2d_1[0][0]                   
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 256, 256, 16) 0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
dropout_1 (Dropout)             (None, 256, 256, 16) 0           activation_1[0][0]               
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 256, 256, 16) 2320        dropout_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 256, 256, 16) 64          conv2d_2[0][0]                   
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 256, 256, 16) 0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
dropout_2 (Dropout)             (None, 256, 256, 16) 0           activation_2[0][0]               
__________________________________________________________________________________________________
add (Add)                       (None, 256, 256, 16) 0           dropout_2[0][0]                  
                                                                 conv2d[0][0]                     
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 128, 128, 16) 0           add[0][0]                        
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 128, 128, 32) 4640        max_pooling2d[0][0]              
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 128, 128, 32) 128         conv2d_3[0][0]                   
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 128, 128, 32) 0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
dropout_3 (Dropout)             (None, 128, 128, 32) 0           activation_3[0][0]               
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 128, 128, 32) 9248        dropout_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 128, 128, 32) 128         conv2d_4[0][0]                   
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 128, 128, 32) 0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
dropout_4 (Dropout)             (None, 128, 128, 32) 0           activation_4[0][0]               
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 128, 128, 32) 9248        dropout_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 128, 128, 32) 128         conv2d_5[0][0]                   
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 128, 128, 32) 0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
dropout_5 (Dropout)             (None, 128, 128, 32) 0           activation_5[0][0]               
__________________________________________________________________________________________________
add_1 (Add)                     (None, 128, 128, 32) 0           dropout_5[0][0]                  
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 64, 64, 32)   0           add_1[0][0]                      
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 64, 64, 64)   18496       max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 64, 64, 64)   256         conv2d_6[0][0]                   
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 64, 64, 64)   0           batch_normalization_6[0][0]      
__________________________________________________________________________________________________
dropout_6 (Dropout)             (None, 64, 64, 64)   0           activation_6[0][0]               
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 64, 64, 64)   36928       dropout_6[0][0]                  
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 64, 64, 64)   256         conv2d_7[0][0]                   
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 64, 64, 64)   0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
dropout_7 (Dropout)             (None, 64, 64, 64)   0           activation_7[0][0]               
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 64, 64, 64)   36928       dropout_7[0][0]                  
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 64, 64, 64)   256         conv2d_8[0][0]                   
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 64, 64, 64)   0           batch_normalization_8[0][0]      
__________________________________________________________________________________________________
dropout_8 (Dropout)             (None, 64, 64, 64)   0           activation_8[0][0]               
__________________________________________________________________________________________________
add_2 (Add)                     (None, 64, 64, 64)   0           dropout_8[0][0]                  
                                                                 conv2d_6[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 32, 32, 64)   0           add_2[0][0]                      
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 32, 32, 128)  73856       max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 32, 32, 128)  512         conv2d_9[0][0]                   
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 32, 32, 128)  0           batch_normalization_9[0][0]      
__________________________________________________________________________________________________
dropout_9 (Dropout)             (None, 32, 32, 128)  0           activation_9[0][0]               
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 32, 32, 128)  147584      dropout_9[0][0]                  
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 32, 32, 128)  512         conv2d_10[0][0]                  
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 32, 32, 128)  0           batch_normalization_10[0][0]     
__________________________________________________________________________________________________
dropout_10 (Dropout)            (None, 32, 32, 128)  0           activation_10[0][0]              
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 32, 32, 128)  147584      dropout_10[0][0]                 
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 32, 32, 128)  512         conv2d_11[0][0]                  
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 32, 32, 128)  0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
dropout_11 (Dropout)            (None, 32, 32, 128)  0           activation_11[0][0]              
__________________________________________________________________________________________________
add_3 (Add)                     (None, 32, 32, 128)  0           dropout_11[0][0]                 
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 16, 16, 128)  0           add_3[0][0]                      
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 16, 16, 256)  295168      max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 16, 16, 256)  1024        conv2d_12[0][0]                  
__________________________________________________________________________________________________
activation_12 (Activation)      (None, 16, 16, 256)  0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
dropout_12 (Dropout)            (None, 16, 16, 256)  0           activation_12[0][0]              
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 16, 16, 256)  590080      dropout_12[0][0]                 
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 16, 16, 256)  1024        conv2d_13[0][0]                  
__________________________________________________________________________________________________
activation_13 (Activation)      (None, 16, 16, 256)  0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
dropout_13 (Dropout)            (None, 16, 16, 256)  0           activation_13[0][0]              
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 16, 16, 256)  590080      dropout_13[0][0]                 
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 16, 16, 256)  1024        conv2d_14[0][0]                  
__________________________________________________________________________________________________
activation_14 (Activation)      (None, 16, 16, 256)  0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
dropout_14 (Dropout)            (None, 16, 16, 256)  0           activation_14[0][0]              
__________________________________________________________________________________________________
add_4 (Add)                     (None, 16, 16, 256)  0           dropout_14[0][0]                 
                                                                 conv2d_12[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)  (None, 8, 8, 256)    0           add_4[0][0]                      
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 8, 8, 512)    131584      max_pooling2d_4[0][0]            
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 8, 8, 512)    2048        conv2d_15[0][0]                  
__________________________________________________________________________________________________
activation_15 (Activation)      (None, 8, 8, 512)    0           batch_normalization_15[0][0]     
__________________________________________________________________________________________________
dropout_15 (Dropout)            (None, 8, 8, 512)    0           activation_15[0][0]              
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 8, 8, 512)    262656      dropout_15[0][0]                 
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 8, 8, 512)    2048        conv2d_16[0][0]                  
__________________________________________________________________________________________________
activation_16 (Activation)      (None, 8, 8, 512)    0           batch_normalization_16[0][0]     
__________________________________________________________________________________________________
dropout_16 (Dropout)            (None, 8, 8, 512)    0           activation_16[0][0]              
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 8, 8, 512)    262656      dropout_16[0][0]                 
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 8, 8, 512)    2048        conv2d_17[0][0]                  
__________________________________________________________________________________________________
activation_17 (Activation)      (None, 8, 8, 512)    0           batch_normalization_17[0][0]     
__________________________________________________________________________________________________
add_5 (Add)                     (None, 8, 8, 512)    0           activation_17[0][0]              
                                                                 conv2d_15[0][0]                  
__________________________________________________________________________________________________
dropout_17 (Dropout)            (None, 8, 8, 512)    0           add_5[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 16, 16, 512)  1049088     dropout_17[0][0]                 
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 16, 16, 768)  0           conv2d_transpose[0][0]           
                                                                 add_4[0][0]                      
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 16, 16, 512)  3539456     concatenate[0][0]                
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 16, 16, 512)  2048        conv2d_18[0][0]                  
__________________________________________________________________________________________________
activation_18 (Activation)      (None, 16, 16, 512)  0           batch_normalization_18[0][0]     
__________________________________________________________________________________________________
dropout_18 (Dropout)            (None, 16, 16, 512)  0           activation_18[0][0]              
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 16, 16, 512)  2359808     dropout_18[0][0]                 
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 16, 16, 512)  2048        conv2d_19[0][0]                  
__________________________________________________________________________________________________
activation_19 (Activation)      (None, 16, 16, 512)  0           batch_normalization_19[0][0]     
__________________________________________________________________________________________________
dropout_19 (Dropout)            (None, 16, 16, 512)  0           activation_19[0][0]              
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 16, 16, 512)  2359808     dropout_19[0][0]                 
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 16, 16, 512)  2048        conv2d_20[0][0]                  
__________________________________________________________________________________________________
activation_20 (Activation)      (None, 16, 16, 512)  0           batch_normalization_20[0][0]     
__________________________________________________________________________________________________
add_6 (Add)                     (None, 16, 16, 512)  0           activation_20[0][0]              
                                                                 conv2d_18[0][0]                  
__________________________________________________________________________________________________
dropout_20 (Dropout)            (None, 16, 16, 512)  0           add_6[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 32, 32, 256)  524544      dropout_20[0][0]                 
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 32, 32, 384)  0           conv2d_transpose_1[0][0]         
                                                                 add_3[0][0]                      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 32, 32, 256)  884992      concatenate_1[0][0]              
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 32, 32, 256)  1024        conv2d_21[0][0]                  
__________________________________________________________________________________________________
activation_21 (Activation)      (None, 32, 32, 256)  0           batch_normalization_21[0][0]     
__________________________________________________________________________________________________
dropout_21 (Dropout)            (None, 32, 32, 256)  0           activation_21[0][0]              
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 32, 32, 256)  590080      dropout_21[0][0]                 
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 32, 32, 256)  1024        conv2d_22[0][0]                  
__________________________________________________________________________________________________
activation_22 (Activation)      (None, 32, 32, 256)  0           batch_normalization_22[0][0]     
__________________________________________________________________________________________________
dropout_22 (Dropout)            (None, 32, 32, 256)  0           activation_22[0][0]              
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 32, 32, 256)  590080      dropout_22[0][0]                 
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 32, 32, 256)  1024        conv2d_23[0][0]                  
__________________________________________________________________________________________________
activation_23 (Activation)      (None, 32, 32, 256)  0           batch_normalization_23[0][0]     
__________________________________________________________________________________________________
add_7 (Add)                     (None, 32, 32, 256)  0           activation_23[0][0]              
                                                                 conv2d_21[0][0]                  
__________________________________________________________________________________________________
dropout_23 (Dropout)            (None, 32, 32, 256)  0           add_7[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 64, 64, 64)   65600       dropout_23[0][0]                 
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 64, 64, 128)  0           conv2d_transpose_2[0][0]         
                                                                 add_2[0][0]                      
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 64, 64, 64)   73792       concatenate_2[0][0]              
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 64, 64, 64)   256         conv2d_24[0][0]                  
__________________________________________________________________________________________________
activation_24 (Activation)      (None, 64, 64, 64)   0           batch_normalization_24[0][0]     
__________________________________________________________________________________________________
dropout_24 (Dropout)            (None, 64, 64, 64)   0           activation_24[0][0]              
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 64, 64, 64)   36928       dropout_24[0][0]                 
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 64, 64, 64)   256         conv2d_25[0][0]                  
__________________________________________________________________________________________________
activation_25 (Activation)      (None, 64, 64, 64)   0           batch_normalization_25[0][0]     
__________________________________________________________________________________________________
dropout_25 (Dropout)            (None, 64, 64, 64)   0           activation_25[0][0]              
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 64, 64, 64)   36928       dropout_25[0][0]                 
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 64, 64, 64)   256         conv2d_26[0][0]                  
__________________________________________________________________________________________________
activation_26 (Activation)      (None, 64, 64, 64)   0           batch_normalization_26[0][0]     
__________________________________________________________________________________________________
add_8 (Add)                     (None, 64, 64, 64)   0           activation_26[0][0]              
                                                                 conv2d_24[0][0]                  
__________________________________________________________________________________________________
dropout_26 (Dropout)            (None, 64, 64, 64)   0           add_8[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_3 (Conv2DTrans (None, 128, 128, 32) 8224        dropout_26[0][0]                 
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 128, 128, 64) 0           conv2d_transpose_3[0][0]         
                                                                 add_1[0][0]                      
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 128, 128, 32) 18464       concatenate_3[0][0]              
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 128, 128, 32) 128         conv2d_27[0][0]                  
__________________________________________________________________________________________________
activation_27 (Activation)      (None, 128, 128, 32) 0           batch_normalization_27[0][0]     
__________________________________________________________________________________________________
dropout_27 (Dropout)            (None, 128, 128, 32) 0           activation_27[0][0]              
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 128, 128, 32) 9248        dropout_27[0][0]                 
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 128, 128, 32) 128         conv2d_28[0][0]                  
__________________________________________________________________________________________________
activation_28 (Activation)      (None, 128, 128, 32) 0           batch_normalization_28[0][0]     
__________________________________________________________________________________________________
dropout_28 (Dropout)            (None, 128, 128, 32) 0           activation_28[0][0]              
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 128, 128, 32) 9248        dropout_28[0][0]                 
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 128, 128, 32) 128         conv2d_29[0][0]                  
__________________________________________________________________________________________________
activation_29 (Activation)      (None, 128, 128, 32) 0           batch_normalization_29[0][0]     
__________________________________________________________________________________________________
add_9 (Add)                     (None, 128, 128, 32) 0           activation_29[0][0]              
                                                                 conv2d_27[0][0]                  
__________________________________________________________________________________________________
dropout_29 (Dropout)            (None, 128, 128, 32) 0           add_9[0][0]                      
__________________________________________________________________________________________________
conv2d_transpose_4 (Conv2DTrans (None, 256, 256, 16) 2064        dropout_29[0][0]                 
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 256, 256, 32) 0           conv2d_transpose_4[0][0]         
                                                                 add[0][0]                        
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 256, 256, 32) 9248        concatenate_4[0][0]              
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 256, 256, 32) 128         conv2d_30[0][0]                  
__________________________________________________________________________________________________
activation_30 (Activation)      (None, 256, 256, 32) 0           batch_normalization_30[0][0]     
__________________________________________________________________________________________________
dropout_30 (Dropout)            (None, 256, 256, 32) 0           activation_30[0][0]              
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 256, 256, 32) 9248        dropout_30[0][0]                 
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 256, 256, 32) 128         conv2d_31[0][0]                  
__________________________________________________________________________________________________
activation_31 (Activation)      (None, 256, 256, 32) 0           batch_normalization_31[0][0]     
__________________________________________________________________________________________________
dropout_31 (Dropout)            (None, 256, 256, 32) 0           activation_31[0][0]              
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 256, 256, 32) 9248        dropout_31[0][0]                 
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 256, 256, 32) 128         conv2d_32[0][0]                  
__________________________________________________________________________________________________
activation_32 (Activation)      (None, 256, 256, 32) 0           batch_normalization_32[0][0]     
__________________________________________________________________________________________________
add_10 (Add)                    (None, 256, 256, 32) 0           activation_32[0][0]              
                                                                 conv2d_30[0][0]                  
__________________________________________________________________________________________________
dropout_32 (Dropout)            (None, 256, 256, 32) 0           add_10[0][0]                     
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 256, 256, 34) 1122        dropout_32[0][0]                 
==================================================================================================
Total params: 14,831,890
Trainable params: 14,820,466
Non-trainable params: 11,424
__________________________________________________________________________________________________
In [ ]:
iou_score = sm.metrics.IOUScore()

f_score = sm.metrics.FScore()

Train data¶

In [ ]:
train_data = SegmentationDataGenerator(
    image_directory = f"{DATA_FOLDER}/train/train2/*_img_*",
    segmentation_directory = f"{DATA_FOLDER}/train/train2/*_lbl_*",
    num_classes = NUM_CLASSES,
    batch_size = BATCH_SIZE * 10,
    augmentation = False
)
train_data_gen = train_data.get_data_generator()
Indexing Image files...
Indexing Segmentation files...
Loaded 23520 images with 23520 segmentations
In [ ]:
import pandas as pd
from tqdm import tqdm

class_index = 1

IoU_df = pd.DataFrame()
FScore_df = pd.DataFrame()
Accuracy_df = pd.DataFrame()

predicted_fscores = []
predicted_ious = []
predicted_accuracies = []

for _ in tqdm(range(len(train_data) // (BATCH_SIZE * 10))):

    x, ground_truth = train_data_gen.__next__()
    
    predicted = model.predict(x)
    #predicted = ground_truth
    

    # ----- calc IoU -----
    iou_predicted = iou_score.__call__(ground_truth, predicted)
    predicted_ious.append(iou_predicted.numpy())

    # ----- calc FScore -----
    fscore_predicted = f_score.__call__(ground_truth, predicted)
    predicted_fscores.append(fscore_predicted.numpy())

    # ----- calc Accuracy -----
    m = tf.keras.metrics.CategoricalAccuracy()
    m.update_state(ground_truth, predicted)
    predicted_accuracies.append(m.result().numpy())
    
FScore_df[f'FScore'] = predicted_fscores
IoU_df[f'IoU'] = predicted_ious
Accuracy_df[f'Accuracy'] = predicted_accuracies

    
# add the two score dataframes horizontally
metrics_df = pd.concat([FScore_df, IoU_df, Accuracy_df], axis=1)
100%|██████████| 130/130 [06:42<00:00,  3.09s/it]
In [ ]:
metrics_df.describe()
Out[ ]:
FScore IoU Accuracy
count 130.000000 130.000000 130.000000
mean 0.414264 0.332495 0.897913
std 0.021827 0.017215 0.009678
min 0.353962 0.287643 0.874729
25% 0.399114 0.321257 0.891209
50% 0.414115 0.331224 0.897991
75% 0.428073 0.342137 0.904406
max 0.495504 0.396390 0.919362

Val data¶

In [ ]:
val_data = SegmentationDataGenerator(
    image_directory = f"{DATA_FOLDER}/val/*_img_*",
    segmentation_directory = f"{DATA_FOLDER}/val/*_lbl_*",
    num_classes = NUM_CLASSES,
    batch_size = BATCH_SIZE * 10,
    augmentation = False
)
val_data_gen = val_data.get_data_generator()
Indexing Image files...
Indexing Segmentation files...
Loaded 8544 images with 8544 segmentations
In [ ]:
import pandas as pd
from tqdm import tqdm

class_index = 1

IoU_df = pd.DataFrame()
FScore_df = pd.DataFrame()
Accuracy_df = pd.DataFrame()

predicted_fscores = []
predicted_ious = []
predicted_accuracies = []

for _ in tqdm(range(len(val_data) // (BATCH_SIZE * 10))):

    x, ground_truth = val_data_gen.__next__()
    
    predicted = model.predict(x)
    #predicted = ground_truth
    

    # ----- calc IoU -----
    iou_predicted = iou_score.__call__(ground_truth, predicted)
    predicted_ious.append(iou_predicted.numpy())

    # ----- calc FScore -----
    fscore_predicted = f_score.__call__(ground_truth, predicted)
    predicted_fscores.append(fscore_predicted.numpy())

    # ----- calc Accuracy -----
    m = tf.keras.metrics.CategoricalAccuracy()
    m.update_state(ground_truth, predicted)
    predicted_accuracies.append(m.result().numpy())
    
FScore_df[f'FScore'] = predicted_fscores
IoU_df[f'IoU'] = predicted_ious
Accuracy_df[f'Accuracy'] = predicted_accuracies

    
# add the two score dataframes horizontally
metrics_df = pd.concat([FScore_df, IoU_df, Accuracy_df], axis=1)
100%|██████████| 47/47 [02:26<00:00,  3.12s/it]
In [ ]:
metrics_df.describe()
Out[ ]:
FScore IoU Accuracy
count 47.000000 47.000000 47.000000
mean 0.330489 0.263915 0.859529
std 0.011199 0.009396 0.013740
min 0.301107 0.242201 0.828114
25% 0.323073 0.258740 0.848265
50% 0.330723 0.264539 0.860878
75% 0.339054 0.271185 0.870674
max 0.348996 0.282250 0.883893

Test data¶

In [ ]:
test_data = SegmentationDataGenerator(
    image_directory = f"{DATA_FOLDER}/test/*_img_*",
    segmentation_directory = f"{DATA_FOLDER}/test/*_lbl_*",
    num_classes = NUM_CLASSES,
    batch_size = BATCH_SIZE * 10,
    augmentation = False
)
test_data_gen = test_data.get_data_generator()
Indexing Image files...
Indexing Segmentation files...
Loaded 1888 images with 1888 segmentations
In [ ]:
 
In [ ]:
import pandas as pd
from tqdm import tqdm

class_index = 1

IoU_df = pd.DataFrame()
FScore_df = pd.DataFrame()
Accuracy_df = pd.DataFrame()

predicted_fscores = []
predicted_ious = []
predicted_accuracies = []

for _ in tqdm(range(len(test_data) // (BATCH_SIZE * 10))):

    x, ground_truth = test_data_gen.__next__()
    
    predicted = model.predict(x)
    #predicted = ground_truth
    

    # ----- calc IoU -----
    iou_predicted = iou_score.__call__(ground_truth, predicted)
    predicted_ious.append(iou_predicted.numpy())

    # ----- calc FScore -----
    fscore_predicted = f_score.__call__(ground_truth, predicted)
    predicted_fscores.append(fscore_predicted.numpy())

    # ----- calc Accuracy -----
    m = tf.keras.metrics.CategoricalAccuracy()
    m.update_state(ground_truth, predicted)
    predicted_accuracies.append(m.result().numpy())
    
FScore_df[f'FScore'] = predicted_fscores
IoU_df[f'IoU'] = predicted_ious
Accuracy_df[f'Accuracy'] = predicted_accuracies

    
# add the two score dataframes horizontally
metrics_df = pd.concat([FScore_df, IoU_df, Accuracy_df], axis=1)
100%|██████████| 10/10 [00:38<00:00,  3.87s/it]
In [ ]:
metrics_df
Out[ ]:
FScore IoU Accuracy
0 0.200322 0.142581 0.695666
1 0.211328 0.150442 0.682157
2 0.175440 0.126439 0.655927
3 0.197279 0.142318 0.665889
4 0.199168 0.140859 0.669912
5 0.179264 0.128246 0.627747
6 0.190417 0.135935 0.692300
7 0.200594 0.144135 0.683633
8 0.185329 0.133341 0.663251
9 0.185098 0.133169 0.678369
In [ ]:
metrics_df.describe()
Out[ ]:
FScore IoU Accuracy
count 10.000000 10.000000 10.000000
mean 0.214698 0.155966 0.686573
std 0.014949 0.012580 0.024465
min 0.201068 0.142520 0.655856
25% 0.205552 0.146631 0.666222
50% 0.211397 0.154810 0.689654
75% 0.215196 0.158643 0.694173
max 0.250587 0.183966 0.734076
In [ ]:
 
In [ ]:
 
In [ ]:
i = 1
In [ ]:
plt.imshow(x[i,:,:])
Out[ ]:
<matplotlib.image.AxesImage at 0x17ed5383508>
In [ ]:
np.argmax(y, axis=-1)[0]
Out[ ]:
array([[21, 21, 21, ..., 17, 17, 17],
       [21, 21, 21, ..., 17, 17, 17],
       [21, 21, 21, ..., 17, 17, 17],
       ...,
       [21, 21, 21, ..., 21, 21, 21],
       [21, 21, 21, ..., 21, 21, 21],
       [21, 21, 21, ..., 21, 21, 21]], dtype=int64)
In [ ]:
y.shape
Out[ ]:
(18, 256, 256, 34)
In [ ]:
np.argmax(y, axis=-1).shape
Out[ ]:
(18, 256, 256)
In [ ]:
plt.imshow(np.argmax(y, axis=-1)[i])
Out[ ]:
<matplotlib.image.AxesImage at 0x1816f440ec8>
In [ ]:
y_pred = model.predict(x)
In [ ]:
seg = y_pred[i]


#undo keras.to_categorical
seg = np.argmax(seg, axis=-1)

plt.imshow(seg)
plt.title('Predicted')
Out[ ]:
Text(0.5, 1.0, 'Predicted')
In [ ]:
seg
Out[ ]:
array([[14, 27, 27, ..., 27, 27, 24],
       [28, 27, 27, ..., 27, 24, 26],
       [28, 27, 27, ..., 27, 27, 10],
       ...,
       [ 8, 27,  8, ...,  8, 26, 26],
       [ 8,  8,  8, ...,  8,  8,  8],
       [ 8, 26,  8, ...,  8, 26, 26]], dtype=int64)
In [ ]:
seg = y[0]


#undo keras.to_categorical
seg = np.argmax(seg, axis=-1)

plt.imshow(seg)
plt.title('GT')
Out[ ]:
Text(0.5, 1.0, 'GT')
In [ ]:
seg
Out[ ]:
array([[21, 21, 21, ..., 21, 21, 21],
       [21, 21, 21, ..., 21, 21, 21],
       [21, 21, 21, ..., 21, 21, 21],
       ...,
       [20, 20, 20, ...,  8,  8,  8],
       [20, 20, 20, ...,  8,  8,  8],
       [20, 20, 20, ...,  8,  8,  8]], dtype=int64)
In [ ]:
test_data = SegmentationDataGenerator(
    image_directory = f"{DATA_FOLDER}/test/*_img_*",
    segmentation_directory = f"{DATA_FOLDER}/test/*_lbl_*",
    num_classes = NUM_CLASSES,
    batch_size = BATCH_SIZE,
    augmentation = False
)
test_data_gen = test_data.get_data_generator()
Indexing Image files...
Indexing Segmentation files...
Loaded 1888 images with 1888 segmentations
In [ ]:
x, y = val_data_gen.__next__()
y_pred = model.predict(x)
In [ ]:
i = 0
In [ ]:
plt.imshow(x[i,:,:])
Out[ ]:
<matplotlib.image.AxesImage at 0x18171433a08>
In [ ]:
y_pred[i]
Out[ ]:
array([[[2.37924120e-04, 2.60944702e-02, 1.73568726e-01, ...,
         9.08670423e-04, 7.29276391e-04, 4.58638743e-03],
        [6.41004590e-05, 2.11885870e-02, 2.36742929e-01, ...,
         3.79066740e-04, 2.37813263e-04, 1.55458332e-03],
        [2.10100570e-05, 1.89635996e-02, 2.48808801e-01, ...,
         1.73576627e-04, 4.41281118e-05, 2.72157398e-04],
        ...,
        [6.33898389e-06, 2.08904594e-03, 1.92259073e-01, ...,
         4.80579547e-05, 4.18057698e-06, 1.62452343e-05],
        [1.74945671e-05, 3.08902655e-03, 1.92678452e-01, ...,
         1.33137932e-04, 1.17306208e-05, 4.36500195e-05],
        [1.66208425e-04, 9.02974792e-03, 2.86121905e-01, ...,
         1.48226309e-03, 7.78151662e-05, 3.47475841e-04]],

       [[1.10291243e-04, 1.60376430e-02, 2.31892943e-01, ...,
         5.52766258e-04, 3.83025821e-04, 2.48698704e-03],
        [1.91299205e-05, 7.27373874e-03, 1.67130619e-01, ...,
         1.72370041e-04, 6.65568805e-05, 3.92284535e-04],
        [9.92745845e-06, 1.30368415e-02, 1.65163949e-01, ...,
         1.48387859e-04, 2.45620267e-05, 1.39388183e-04],
        ...,
        [3.10359752e-07, 4.47466533e-04, 1.08855046e-01, ...,
         2.61829496e-06, 1.69543952e-07, 7.50495303e-07],
        [7.03729825e-07, 7.31097884e-04, 1.05045676e-01, ...,
         1.10481114e-05, 6.22746882e-07, 2.96440180e-06],
        [1.86930647e-05, 2.93650222e-03, 2.65159994e-01, ...,
         2.53717124e-04, 9.63871298e-06, 5.16286673e-05]],

       [[6.96420902e-05, 1.00675095e-02, 2.64521211e-01, ...,
         4.98406647e-04, 1.78893650e-04, 1.10308710e-03],
        [1.23512282e-05, 3.82454530e-03, 1.92650631e-01, ...,
         1.80904826e-04, 3.15343095e-05, 1.87612648e-04],
        [8.64787762e-06, 6.08386938e-03, 1.65827781e-01, ...,
         1.02570863e-04, 8.88793693e-06, 4.09898021e-05],
        ...,
        [1.41707289e-07, 4.08647873e-04, 1.36491641e-01, ...,
         2.00432805e-06, 3.84821952e-08, 1.44224515e-07],
        [4.19946360e-07, 4.39064926e-04, 1.25933960e-01, ...,
         6.34171693e-06, 1.82335398e-07, 8.12267899e-07],
        [1.14160557e-05, 3.41794710e-03, 3.12157333e-01, ...,
         2.32033344e-04, 6.67994163e-06, 3.52795105e-05]],

       ...,

       [[3.82016209e-04, 6.17164478e-04, 9.33961291e-03, ...,
         5.72803337e-03, 2.85618007e-04, 1.31564273e-03],
        [2.02298776e-04, 1.21463614e-04, 7.37279560e-03, ...,
         3.55358585e-03, 1.09617133e-04, 6.32259354e-04],
        [2.08218276e-04, 1.35965791e-04, 7.01842923e-03, ...,
         3.91556229e-03, 9.71399641e-05, 6.02414482e-04],
        ...,
        [1.68975355e-04, 2.49136705e-04, 1.95245340e-03, ...,
         7.57773174e-04, 3.14587844e-04, 2.65584304e-03],
        [2.10364757e-04, 4.93296189e-04, 2.71413452e-03, ...,
         6.21957821e-04, 3.16374557e-04, 2.80790683e-03],
        [3.15245095e-04, 9.73032031e-04, 3.96009162e-03, ...,
         1.00197818e-03, 6.28169626e-04, 5.47387172e-03]],

       [[3.36640078e-04, 8.89856427e-04, 8.06372799e-03, ...,
         7.81655684e-03, 3.75440984e-04, 1.71525462e-03],
        [1.77036854e-04, 1.51029308e-04, 6.32925378e-03, ...,
         3.91690573e-03, 1.08778084e-04, 7.45678728e-04],
        [1.94542401e-04, 1.26093044e-04, 8.65414552e-03, ...,
         3.56414914e-03, 8.20249697e-05, 5.67240757e-04],
        ...,
        [1.62705634e-04, 2.74029910e-04, 2.32026842e-03, ...,
         7.69194274e-04, 2.55506166e-04, 2.34768167e-03],
        [1.91222163e-04, 4.77746653e-04, 2.77529354e-03, ...,
         5.73022640e-04, 2.48549622e-04, 2.32334388e-03],
        [3.19330225e-04, 1.10458606e-03, 3.89013626e-03, ...,
         1.05867069e-03, 6.29469927e-04, 5.27113257e-03]],

       [[3.96613585e-04, 1.76985562e-03, 8.64160247e-03, ...,
         6.06388086e-03, 7.13341637e-04, 3.28961201e-03],
        [3.15421494e-04, 6.87019550e-04, 8.65804590e-03, ...,
         4.65713721e-03, 3.01198510e-04, 1.70413812e-03],
        [3.67474830e-04, 6.94394519e-04, 1.22158052e-02, ...,
         5.41853346e-03, 2.81391956e-04, 1.58412720e-03],
        ...,
        [2.66871328e-04, 7.25262274e-04, 3.72309773e-03, ...,
         1.47962256e-03, 5.63227630e-04, 4.59408760e-03],
        [3.00744665e-04, 1.22890319e-03, 4.54558479e-03, ...,
         1.54438568e-03, 6.41048595e-04, 4.90531698e-03],
        [4.09527216e-04, 1.84808567e-03, 5.26570017e-03, ...,
         1.91820867e-03, 1.11225853e-03, 7.65809882e-03]]], dtype=float32)
In [ ]:
seg = y_pred[i]


#undo keras.to_categorical
seg = np.argmax(seg, axis=-1)

plt.imshow(seg)
plt.title('Predicted')
Out[ ]:
Text(0.5, 1.0, 'Predicted')
In [ ]:
seg = y[i]


#undo keras.to_categorical
seg = np.argmax(seg, axis=-1)

plt.imshow(seg)
plt.title('GT')
Out[ ]:
Text(0.5, 1.0, 'GT')
In [ ]: